library(lmerTest)
library(ggeffects)
library(dplyr)
library(report)
package ‘report’ was built under R version 4.0.5
library(r2glmm)
fullTest <- read.csv("../Cleaning/output/fullTest.csv")
fullTrain <- read.csv("../Cleaning/output/fullTrain.csv")

traitsFreqs <- read.csv("../Cleaning/output/traitFreqOverUnder.csv")
traitsFreqs <- rename(traitsFreqs, props = optionChoiceN)
fullTest <- merge(fullTest, traitsFreqs[c("trait","props")], by = "trait")
fullTest$propCorr <- ifelse(fullTest$Estimator=="Underestimator", 1-fullTest$props, fullTest$props)

uSubs <- unique(fullTest$subID)

indDiffs <- fullTest[!duplicated(fullTest$subID),]
fullTest$ingChoiceN <- as.factor(fullTest$ingChoiceN)
fullTest$novel <- as.factor(fullTest$novel)
fullTest$selfResp.Z <- scale(fullTest$selfResp)
fullTest$SE.Z <- scale(fullTest$SE)
fullTest$iSE.Z <- scale(fullTest$iSE)
fullTest$oSE.Z <- scale(fullTest$oSE)
fullTest$predicted.Z <- scale(fullTest$predicted)
fullTest$slope.Z <- scale(fullTest$slope)
fullTest$entropy.Z <- scale(fullTest$entropy)
fullTest$WSR.Z <- scale(fullTest$WSR)
fullTest$neighAveOutSE.Z <- scale(fullTest$neighAveOutSE)
fullTest$neighAveAllSE.Z <- scale(fullTest$neighAveAllSE)
fullTest$neighAveInSE.Z <- scale(fullTest$neighAveInSE)
fullTest$novel <- as.factor(fullTest$novel)
levels(fullTest$novel) <- list("Trained"  = "0", "Held Out" = "1")
# prop.test(traitsFreqs$optionChoiceN, traitsFreqs$N, p=rep(.5,length(traitsFreqs$N)))
# 
# m <- glmer( ingChoiceN ~ trait + ( 1 | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
#                                     optCtrl = list(maxfun = 100000)),
#     nAGQ = 1)
# 
# fullTest$trait <- as.factor(fullTest$trait)
# contrasts(fullTest$trait) <- contr.sum(148)
# m <- glm(optionChoiceN ~ trait, family = binomial,
#           data = fullTest
#          )
# summary(m)
summary(m)

Call:
glm(formula = optionChoiceN ~ 1, family = binomial, data = traitsFreqs)

Deviance Residuals: 
     Min        1Q    Median        3Q       Max  
-0.81807  -0.21571   0.02476   0.21563   0.82291  

Coefficients:
            Estimate Std. Error z value Pr(>|z|)
(Intercept)   0.1913     0.1652   1.158    0.247

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 13.212  on 147  degrees of freedom
Residual deviance: 13.212  on 147  degrees of freedom
AIC: 203.17

Number of Fisher Scoring iterations: 3
propMatrix <- matrix(nrow=148,ncol=7)
for(i in 1:148){
    traitDf <- subset(fullTest, Idx==i)
    test <- t.test(as.numeric(traitDf$ingChoiceN)-1, mu=.50)
    propMatrix[i, ] <- c(i, test$statistic, test$p.value, test$conf.int, test$estimate, test$parameter)
}
colnames(propMatrix) <- c("Idx", "stat", "p", "LCI", "UCI", "est", "param")
propMatrix <- as.data.frame(propMatrix)
propMatrix$trait <- traitsFreqs$trait
propMatrix <- propMatrix[order(propMatrix$p),]
library(corrr)
x <- indDiffs %>% 
    select(groupHomoph, DS:SING.Inter) %>%
  correlate() %>% 
  focus(groupHomoph) %>%
    arrange(groupHomoph)

Correlation method: 'pearson'
Missing treated using: 'pairwise.complete.obs'
x %>% 
  mutate(rowname = factor(rowname, levels = rowname[order(groupHomoph)])) %>%  # Order by correlation strength
  ggplot(aes(x = rowname, y = groupHomoph)) +
    geom_bar(stat = "identity") +
    ylab("Correlation Coefficient") +
    xlab("Individual Differences") + theme_grey(base_size = 9)  + theme(axis.text.x = element_text(angle = 90,hjust = 1)) +
  theme(axis.text.x = element_text( 
                           size = 9, angle = 45, vjust = 1)) + theme(axis.title.x = element_text(vjust=1.9)) + theme(axis.text=element_text(size=9),
        axis.title=element_text(size=9,face="bold")) + theme(legend.text = element_text(size=9)) + theme(panel.border = element_rect(colour = "black", fill = NA, size =1)) + theme(legend.title = element_blank()) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))

Familiarity predicts Reaction Time

summary(m)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: log(RT) ~ fam + propCorr + desirability + (fam | subID) + (1 |      trait)
   Data: fullTest

REML criterion at convergence: 11321

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-8.3287 -0.5140 -0.0934  0.4175  7.9841 

Random effects:
 Groups   Name        Variance  Std.Dev. Corr
 trait    (Intercept) 8.900e-03 0.094340     
 subID    (Intercept) 4.609e-01 0.678881     
          fam         2.238e-05 0.004731 1.00
 Residual             7.821e-01 0.884369     
Number of obs: 4292, groups:  trait, 148; subID, 29

Fixed effects:
               Estimate Std. Error         df t value Pr(>|t|)  
(Intercept)   1.742e-01  2.418e-01  1.360e+02   0.720   0.4727  
fam           6.877e-03  3.544e-03  1.169e+02   1.940   0.0548 .
propCorr      8.663e-02  8.951e-02  4.223e+03   0.968   0.3332  
desirability -2.723e-02  3.800e-02  1.468e+02  -0.717   0.4747  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) fam    prpCrr
fam          0.172              
propCorr    -0.175  0.030       
desirabilty -0.776 -0.393 -0.023
optimizer (nloptwrap) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Replication of prior self-anchoring findings: Self-evaluations predicting ingroup evaluations

summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ selfResp.Z + propCorr + desirability + (selfResp.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  3050.2   3097.0  -1517.1   3034.2     2548 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.2611 -0.8119  0.4028  0.7411  4.3393 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.00000  0.0000       
 subID  (Intercept) 0.47448  0.6888       
        selfResp.Z  0.09608  0.3100   0.61
Number of obs: 2556, groups:  trait, 148; subID, 29

Fixed effects:
             Estimate Std. Error z value Pr(>|z|)    
(Intercept)  -1.45628    0.62618  -2.326   0.0200 *  
selfResp.Z    0.27625    0.07804   3.540   0.0004 ***
propCorr      5.00392    0.32287  15.498   <2e-16 ***
desirability -0.12952    0.10317  -1.255   0.2093    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slfR.Z prpCrr
selfResp.Z   0.183              
propCorr    -0.204  0.052       
desirabilty -0.942 -0.106 -0.054
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Does similarity-weighted self-evaluation average predict ingroup choices?

summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ WSR.Z + propCorr + desirability + (WSR.Z | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  5030.1   5080.9  -2507.1   5014.1     4213 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-5.6590 -0.7921  0.3796  0.7276  3.6805 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.000    0.000        
 subID  (Intercept) 5.967    2.443        
        WSR.Z       6.912    2.629    0.40
Number of obs: 4221, groups:  trait, 148; subID, 29

Fixed effects:
             Estimate Std. Error z value Pr(>|z|)    
(Intercept)  -1.99361    0.69564  -2.866  0.00416 ** 
WSR.Z         0.74295    0.54509   1.363  0.17288    
propCorr      4.83641    0.26082  18.543  < 2e-16 ***
desirability -0.11835    0.08155  -1.451  0.14672    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) WSR.Z  prpCrr
WSR.Z        0.289              
propCorr    -0.158  0.020       
desirabilty -0.669 -0.087 -0.053
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Does self-evaluation weighted similarity predict ingroup choices?

Do cross-validated similarity*self-evaluation predictions predict ingroup choices?

Do cross-validated similarity*self-evaluation predictions predict ingroup choices, regardless of whether it was seen prior or not?

m <- glmer( ingChoiceN ~ predicted.Z * novel + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z * novel + (predicted.Z + novel | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4836.3   4898.4  -2408.2   4816.3     3680 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.2577 -1.0435  0.6154  0.8642  2.1657 

Random effects:
 Groups Name          Variance Std.Dev. Corr       
 subID  (Intercept)   0.297398 0.54534             
        predicted.Z   0.015375 0.12400  -0.95      
        novelHeld Out 0.004533 0.06733   0.10  0.22
Number of obs: 3690, groups:  subID, 29

Fixed effects:
                           Estimate Std. Error z value Pr(>|z|)   
(Intercept)                0.309950   0.116300   2.665   0.0077 **
predicted.Z                0.259248   0.107170   2.419   0.0156 * 
novelHeld Out              0.002235   0.075330   0.030   0.9763   
predicted.Z:novelHeld Out -0.053348   0.077558  -0.688   0.4915   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z nvlHlO
predicted.Z -0.321              
novelHeldOt -0.308  0.087       
prdctd.Z:HO  0.067 -0.289 -0.043
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
                     Effect   Rsq upper.CL lower.CL
1                     Model 0.012    0.021    0.007
2               predicted.Z 0.008    0.015    0.004
4 predicted.Z:novelHeld Out 0.000    0.002    0.000
3             novelHeld Out 0.000    0.001    0.000
ggpredict(m, c("predicted.Z", "novel")) %>% plot(show.title=F)+ xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()  + scale_color_discrete(labels = c("Trained","Held-Out"))
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.
Scale for 'colour' is already present. Adding another scale for 'colour', which will replace the existing scale.
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVpredictionGeneralize.tiff",dpi=600)
Saving 7.29 x 4.51 in image

Does generalization depend on outdegree?

m <- glmer( ingChoiceN ~ predicted.Z * novel * outDegree + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z * novel * outDegree + (predicted.Z +      novel | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4832.9   4919.9  -2402.5   4804.9     3676 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.3855 -1.0379  0.5867  0.8615  2.2460 

Random effects:
 Groups Name          Variance Std.Dev. Corr       
 subID  (Intercept)   0.29679  0.54479             
        predicted.Z   0.01238  0.11127  -0.91      
        novelHeld Out 0.00446  0.06678   0.09  0.33
Number of obs: 3690, groups:  subID, 29

Fixed effects:
                                     Estimate Std. Error z value Pr(>|z|)  
(Intercept)                          0.243639   0.138851   1.755   0.0793 .
predicted.Z                          0.291423   0.134287   2.170   0.0300 *
novelHeld Out                       -0.148255   0.142113  -1.043   0.2968  
outDegree                            0.003400   0.003735   0.910   0.3627  
predicted.Z:novelHeld Out           -0.331784   0.143766  -2.308   0.0210 *
predicted.Z:outDegree               -0.002504   0.003750  -0.668   0.5043  
novelHeld Out:outDegree              0.006894   0.005731   1.203   0.2290  
predicted.Z:novelHeld Out:outDegree  0.013065   0.005807   2.250   0.0245 *
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z nvlHlO outDgr pr.Z:HO pr.Z:D nvHO:D
predicted.Z -0.184                                           
novelHeldOt -0.442  0.064                                    
outDegree   -0.555  0.010  0.544                             
prdctd.Z:HO  0.058 -0.430  0.026 -0.056                      
prdctd.Z:tD  0.058 -0.577 -0.054 -0.125  0.538               
nvlHldOt:tD  0.364 -0.040 -0.847 -0.649 -0.041   0.082       
prdc.Z:HO:D -0.035  0.371 -0.038  0.080 -0.844  -0.646  0.040
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
                               Effect   Rsq upper.CL lower.CL
1                               Model 0.013    0.023    0.008
2                         predicted.Z 0.003    0.007    0.000
5           predicted.Z:novelHeld Out 0.002    0.005    0.000
8 predicted.Z:novelHeld Out:outDegree 0.001    0.005    0.000
7             novelHeld Out:outDegree 0.000    0.003    0.000
3                       novelHeld Out 0.000    0.002    0.000
4                           outDegree 0.000    0.002    0.000
6               predicted.Z:outDegree 0.000    0.002    0.000
ggpredict(m, c("predicted.Z", "outDegree" ,"novel")) %>% plot(show.title=F)+ xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVpredictionGeneralize.tiff",dpi=600)
Saving 7.29 x 4.51 in image

Does generalization depend on indegree?

Nope

summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ predicted.Z * novel * inDegree + (predicted.Z +      novel | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4842.6   4929.6  -2407.3   4814.6     3676 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.3708 -1.0425  0.6081  0.8478  2.1489 

Random effects:
 Groups Name          Variance Std.Dev. Corr       
 subID  (Intercept)   0.297726 0.54564             
        predicted.Z   0.014331 0.11971  -0.94      
        novelHeld Out 0.004817 0.06941   0.06  0.29
Number of obs: 3690, groups:  subID, 29

Fixed effects:
                                     Estimate Std. Error z value Pr(>|z|)  
(Intercept)                         0.3072752  0.1489679   2.063   0.0391 *
predicted.Z                         0.2147101  0.1427624   1.504   0.1326  
novelHeld Out                      -0.0106251  0.1574787  -0.067   0.9462  
inDegree                            0.0001678  0.0045112   0.037   0.9703  
predicted.Z:novelHeld Out          -0.1367979  0.1585589  -0.863   0.3883  
predicted.Z:inDegree                0.0019902  0.0043923   0.453   0.6505  
novelHeld Out:inDegree              0.0005728  0.0065070   0.088   0.9298  
predicted.Z:novelHeld Out:inDegree  0.0040134  0.0066092   0.607   0.5437  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z nvlHlO inDegr pr.Z:HO pr.Z:D nvHO:D
predicted.Z -0.172                                           
novelHeldOt -0.495  0.045                                    
inDegree    -0.626 -0.007  0.598                             
prdctd.Z:HO  0.043 -0.485  0.019 -0.033                      
prdctd.Z:nD  0.041 -0.652 -0.035 -0.069  0.585               
nvlHldOt:nD  0.437 -0.020 -0.878 -0.693 -0.027   0.048       
prdc.Z:HO:D -0.024  0.429 -0.025  0.046 -0.873  -0.666  0.020

Neighboring Dependencies Predicting Choices

Generalization of Outdegree Neighboring Self-Evaluations

summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ neighAveOutSE.Z * novel + desirability + propCorr +  
    (neighAveOutSE.Z + novel | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4419.5   4500.0  -2196.7   4393.5     3601 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.1997 -0.8726  0.4282  0.7774  4.3536 

Random effects:
 Groups Name            Variance  Std.Dev.  Corr     
 trait  (Intercept)     8.505e-15 9.222e-08          
 subID  (Intercept)     3.667e-01 6.056e-01          
        neighAveOutSE.Z 3.430e-02 1.852e-01 0.44     
        novelHeld Out   2.598e-02 1.612e-01 0.13 0.95
Number of obs: 3614, groups:  trait, 147; subID, 25

Fixed effects:
                              Estimate Std. Error z value Pr(>|z|)    
(Intercept)                   -2.05530    0.51479  -3.992 6.54e-05 ***
neighAveOutSE.Z                0.23453    0.07600   3.086  0.00203 ** 
novelHeld Out                  0.02107    0.08187   0.257  0.79692    
desirability                  -0.01230    0.08333  -0.148  0.88266    
propCorr                       4.79310    0.26952  17.784  < 2e-16 ***
neighAveOutSE.Z:novelHeld Out  0.03691    0.08016   0.460  0.64520    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) nAOSE. nvlHlO dsrblt prpCrr
nghAvOtSE.Z  0.105                            
novelHeldOt -0.057  0.167                     
desirabilty -0.931 -0.071  0.015              
propCorr    -0.227  0.045  0.006 -0.035       
ngAOSE.Z:HO -0.003 -0.375  0.013  0.007  0.032
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')

Does entropy (i.e., uncertainty) predict likelihood of ingroup choices?

Does a linear trend of similarity-based probabilities predict ingroup choices?

Does a linear trend of similarity-based probabilities predict ingroup choices, controlling for self-descriptiveness?

m <- glmer( ingChoiceN ~ scale(slope) + selfResp.Z  + ( scale(slope) + selfResp.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ scale(slope) + selfResp.Z + (scale(slope) + selfResp.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  3324.8   3383.3  -1652.4   3304.8     2546 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.0960 -1.0011  0.5507  0.8387  2.3694 

Random effects:
 Groups Name         Variance Std.Dev. Corr       
 trait  (Intercept)  0.00000  0.0000              
 subID  (Intercept)  0.38198  0.6180              
        scale(slope) 0.04406  0.2099   -0.99      
        selfResp.Z   0.13234  0.3638    0.55 -0.66
Number of obs: 2556, groups:  trait, 148; subID, 29

Fixed effects:
             Estimate Std. Error z value Pr(>|z|)  
(Intercept)   0.27038    0.12721   2.126   0.0335 *
scale(slope)  0.21730    0.11988   1.813   0.0699 .
selfResp.Z    0.19319    0.08783   2.200   0.0278 *
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(s)
scale(slop) -0.406       
selfResp.Z   0.403 -0.454
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")

Does a non-parametric trend of similarity-based probabilities predict ingroup choices, controlling for self-descriptiveness?

m <- glmer( ingChoiceN ~ scale(nlslope) + selfResp.Z +  ( scale(nlslope) + selfResp.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ scale(nlslope) + selfResp.Z + (scale(nlslope) +      selfResp.Z | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  3327.3   3385.7  -1653.6   3307.3     2546 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.0256 -0.9895  0.5363  0.8481  2.3016 

Random effects:
 Groups Name           Variance Std.Dev. Corr       
 trait  (Intercept)    0.0000   0.0000              
 subID  (Intercept)    0.4056   0.6369              
        scale(nlslope) 0.2145   0.4632   -0.24      
        selfResp.Z     0.1390   0.3728    0.62 -0.44
Number of obs: 2556, groups:  trait, 148; subID, 29

Fixed effects:
               Estimate Std. Error z value Pr(>|z|)  
(Intercept)     0.35811    0.14766   2.425   0.0153 *
scale(nlslope)  0.17096    0.14329   1.193   0.2328  
selfResp.Z      0.20564    0.08781   2.342   0.0192 *
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(n)
scal(nlslp) -0.073       
selfResp.Z   0.393 -0.389
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")

Does a linear trend of similarity-based probabilities predict ingroup choices?

m <- glmer( ingChoiceN ~ slope.Z  + predicted.Z + ( slope.Z + predicted.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ slope.Z + predicted.Z + (slope.Z + predicted.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4653.3   4715.5  -2316.7   4633.3     3680 

Scaled residuals: 
   Min     1Q Median     3Q    Max 
-6.507 -1.001  0.472  0.811  2.610 

Random effects:
 Groups Name        Variance Std.Dev. Corr       
 trait  (Intercept)  0.0000  0.0000              
 subID  (Intercept)  0.4779  0.6913              
        slope.Z     20.1334  4.4870   -0.59      
        predicted.Z 21.3954  4.6255    0.57 -1.00
Number of obs: 3690, groups:  trait, 148; subID, 29

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)  
(Intercept)   0.2051     0.1384   1.481   0.1385  
slope.Z       1.5056     0.9093   1.656   0.0978 .
predicted.Z  -1.3237     0.9392  -1.409   0.1587  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z
slope.Z     -0.573       
predicted.Z  0.537 -0.994
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")

Backwards solution: Can you predict self-evaluations from similarity to ingroup and outgroup choices?

summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ eSE + (eSE | subID)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4785.1   4816.1  -2387.5   4775.1     3635 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.6814 -1.0139  0.5896  0.8615  2.8232 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 subID  (Intercept) 24.66    4.965         
        eSE         16.76    4.094    -1.00
Number of obs: 3640, groups:  subID, 25

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)
(Intercept)  -0.4974     1.1746  -0.424    0.672
eSE           0.6004     0.9522   0.631    0.528

Correlation of Fixed Effects:
    (Intr)
eSE -0.996
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: ingChoiceN ~ sSE * novel + (sSE | subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4791.8   4841.4  -2387.9   4775.8     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.6486 -1.0125  0.5929  0.8627  2.7918 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept)  0.000   0.000         
 subID  (Intercept)  1.134   1.065         
        sSE         26.117   5.111    -0.90
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)
(Intercept)  0.03974    0.29415   0.135    0.893
sSE          0.89300    1.28620   0.694    0.487
novel1       0.12039    0.32510   0.370    0.711
sSE:novel1  -0.36572    1.26144  -0.290    0.772

Correlation of Fixed Effects:
           (Intr) sSE    novel1
sSE        -0.928              
novel1     -0.427  0.366       
sSE:novel1  0.413 -0.372 -0.976
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(oSE) + (scale(oSE) | subID) + (1 |      trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  5482.7   5520.8  -2735.3   5470.7     4215 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.6879 -0.9939  0.5179  0.8500  2.8947 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.0000   0.0000       
 subID  (Intercept) 0.3871   0.6222       
        scale(oSE)  0.1068   0.3267   0.59
Number of obs: 4221, groups:  trait, 148; subID, 29

Fixed effects:
            Estimate Std. Error z value Pr(>|z|)  
(Intercept)  0.30322    0.12022   2.522   0.0117 *
scale(oSE)   0.08435    0.06924   1.218   0.2232  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
           (Intr)
scale(oSE) 0.504 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(RSE) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * scale(RSE) + (predicted.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4449.7   4498.6  -2216.8   4433.7     3364 

Scaled residuals: 
   Min     1Q Median     3Q    Max 
-2.284 -1.052  0.623  0.872  2.044 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr 
 trait  (Intercept) 2.669e-16 1.634e-08      
 subID  (Intercept) 2.409e-01 4.908e-01      
        predicted.Z 7.502e-05 8.662e-03 -1.00
Number of obs: 3372, groups:  trait, 148; subID, 25

Fixed effects:
                       Estimate Std. Error z value Pr(>|z|)   
(Intercept)             0.24110    0.11447   2.106  0.03518 * 
predicted.Z             0.34727    0.12546   2.768  0.00564 **
scale(RSE)             -0.18688    0.12281  -1.522  0.12807   
predicted.Z:scale(RSE)  0.11928    0.09511   1.254  0.20978   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z s(RSE)
predicted.Z -0.043              
scale(RSE)   0.012 -0.521       
prd.Z:(RSE) -0.373  0.212 -0.159
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("predicted.Z", "RSE")) %>% plot()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SCC) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * scale(SCC) + (predicted.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4452.1   4501.1  -2218.1   4436.1     3364 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.2296 -1.0482  0.6315  0.8635  2.1392 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr 
 trait  (Intercept) 2.572e-14 1.604e-07      
 subID  (Intercept) 2.608e-01 5.107e-01      
        predicted.Z 1.120e-02 1.058e-01 -1.00
Number of obs: 3372, groups:  trait, 148; subID, 25

Fixed effects:
                        Estimate Std. Error z value Pr(>|z|)  
(Intercept)             0.297517   0.118444   2.512   0.0120 *
predicted.Z             0.279553   0.115650   2.417   0.0156 *
scale(SCC)             -0.008679   0.119455  -0.073   0.9421  
predicted.Z:scale(SCC) -0.046727   0.120280  -0.388   0.6977  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z s(SCC)
predicted.Z -0.267              
scale(SCC)   0.123 -0.318       
prd.Z:(SCC) -0.344 -0.080 -0.369
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("predicted.Z", "SCC")) %>% plot()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(DS) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * scale(DS) + (predicted.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4452.3   4501.3  -2218.2   4436.3     3364 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.2274 -1.0484  0.6330  0.8691  2.1269 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.000000 0.00000       
 subID  (Intercept) 0.259573 0.50948       
        predicted.Z 0.009853 0.09926  -1.00
Number of obs: 3372, groups:  trait, 148; subID, 25

Fixed effects:
                       Estimate Std. Error z value Pr(>|z|)  
(Intercept)            0.285057   0.116450   2.448   0.0144 *
predicted.Z            0.258527   0.115217   2.244   0.0248 *
scale(DS)             -0.020453   0.130288  -0.157   0.8753  
predicted.Z:scale(DS)  0.009681   0.117406   0.082   0.9343  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z sc(DS)
predicted.Z -0.322              
scale(DS)   -0.171  0.374       
prdc.Z:(DS)  0.313 -0.161 -0.584
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("predicted.Z", "DS")) %>% plot()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(NFC) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * scale(NFC) + (predicted.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4448.3   4497.3  -2216.1   4432.3     3364 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.2304 -1.0520  0.6456  0.8539  2.2222 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.000000 0.00000       
 subID  (Intercept) 0.233963 0.48370       
        predicted.Z 0.008746 0.09352  -1.00
Number of obs: 3372, groups:  trait, 148; subID, 25

Fixed effects:
                       Estimate Std. Error z value Pr(>|z|)   
(Intercept)             0.31018    0.10638   2.916  0.00355 **
predicted.Z             0.31111    0.11148   2.791  0.00526 **
scale(NFC)             -0.08793    0.10823  -0.812  0.41655   
predicted.Z:scale(NFC)  0.26711    0.13185   2.026  0.04278 * 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z s(NFC)
predicted.Z -0.266              
scale(NFC)  -0.041  0.035       
prd.Z:(NFC)  0.124  0.251 -0.408
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("predicted.Z", "NFC")) %>% plot()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SING.Ind) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * scale(SING.Ind) + (predicted.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4445.3   4494.3  -2214.7   4429.3     3364 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.3660 -1.0464  0.6193  0.8716  1.9869 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr
 trait  (Intercept) 1.696e-15 4.118e-08     
 subID  (Intercept) 2.129e-01 4.614e-01     
        predicted.Z 6.052e-03 7.779e-02 1.00
Number of obs: 3372, groups:  trait, 148; subID, 25

Fixed effects:
                            Estimate Std. Error z value Pr(>|z|)   
(Intercept)                  0.22506    0.11183   2.013  0.04416 * 
predicted.Z                  0.49350    0.15106   3.267  0.00109 **
scale(SING.Ind)             -0.41608    0.15616  -2.664  0.00771 **
predicted.Z:scale(SING.Ind)  0.12930    0.06301   2.052  0.04015 * 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z s(SING
predicted.Z  0.234              
scl(SING.I) -0.124 -0.744       
p.Z:(SING.I -0.391  0.062  0.074
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("predicted.Z", "SING.Ind")) %>% plot()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SING.Inter) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * scale(SING.Inter) + (predicted.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4450.2   4499.2  -2217.1   4434.2     3364 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.3195 -1.0424  0.6272  0.8741  2.0284 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr
 trait  (Intercept) 6.097e-15 7.808e-08     
 subID  (Intercept) 2.510e-01 5.010e-01     
        predicted.Z 1.353e-03 3.678e-02 1.00
Number of obs: 3372, groups:  trait, 148; subID, 25

Fixed effects:
                              Estimate Std. Error z value Pr(>|z|)   
(Intercept)                     0.3212     0.1107   2.900  0.00373 **
predicted.Z                     0.2784     0.1237   2.251  0.02437 * 
scale(SING.Inter)               0.1673     0.1113   1.504  0.13262   
predicted.Z:scale(SING.Inter)  -0.1145     0.0868  -1.319  0.18708   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z s(SING
predicted.Z  0.199              
scl(SING.I)  0.055 -0.040       
p.Z:(SING.I -0.205 -0.382 -0.152
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("predicted.Z", "SING.Inter")) %>% plot()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(Proto) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * scale(Proto) + (predicted.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4451.6   4500.6  -2217.8   4435.6     3364 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.2262 -1.0496  0.6326  0.8715  2.1077 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.000000 0.00000       
 subID  (Intercept) 0.246752 0.49674       
        predicted.Z 0.005939 0.07706  -1.00
Number of obs: 3372, groups:  trait, 148; subID, 25

Fixed effects:
                         Estimate Std. Error z value Pr(>|z|)  
(Intercept)               0.26086    0.11026   2.366   0.0180 *
predicted.Z               0.25533    0.11199   2.280   0.0226 *
scale(Proto)             -0.02969    0.12083  -0.246   0.8059  
predicted.Z:scale(Proto)  0.06533    0.08536   0.765   0.4441  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z scl(P)
predicted.Z -0.196              
scale(Prot)  0.093 -0.338       
prdct.Z:(P) -0.224 -0.006 -0.472
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("predicted.Z", "Proto")) %>% plot()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SI) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * scale(SI) + (predicted.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4451.5   4500.4  -2217.7   4435.5     3364 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.1926 -1.0490  0.6295  0.8731  2.1288 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr 
 trait  (Intercept) 1.893e-14 1.376e-07      
 subID  (Intercept) 2.480e-01 4.980e-01      
        predicted.Z 1.495e-02 1.223e-01 -1.00
Number of obs: 3372, groups:  trait, 148; subID, 25

Fixed effects:
                      Estimate Std. Error z value Pr(>|z|)  
(Intercept)            0.27631    0.10918   2.531   0.0114 *
predicted.Z            0.22812    0.11042   2.066   0.0388 *
scale(SI)              0.08172    0.11025   0.741   0.4585  
predicted.Z:scale(SI)  0.02415    0.08217   0.294   0.7689  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z sc(SI)
predicted.Z -0.293              
scale(SI)    0.052 -0.128       
prdc.Z:(SI) -0.101 -0.344 -0.393
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("predicted.Z", "SI")) %>% plot()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(NTB) + ( predicted.Z | subID) + ( predicted.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * scale(NTB) + (predicted.Z |      subID) + (predicted.Z | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4450.7   4512.0  -2215.4   4430.7     3362 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.2315 -1.0498  0.6251  0.8688  2.0892 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr 
 trait  (Intercept) 1.100e-14 1.049e-07      
        predicted.Z 2.054e-13 4.532e-07 1.00 
 subID  (Intercept) 2.041e-01 4.518e-01      
        predicted.Z 1.935e-03 4.399e-02 -1.00
Number of obs: 3372, groups:  trait, 148; subID, 25

Fixed effects:
                       Estimate Std. Error z value Pr(>|z|)   
(Intercept)             0.28922    0.09881   2.927  0.00342 **
predicted.Z             0.26267    0.10390   2.528  0.01146 * 
scale(NTB)              0.23726    0.10150   2.338  0.01941 * 
predicted.Z:scale(NTB) -0.05590    0.09787  -0.571  0.56790   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z s(NTB)
predicted.Z -0.124              
scale(NTB)   0.018 -0.049       
prd.Z:(NTB)  0.032 -0.353  0.177
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("predicted.Z", "NTB")) %>% plot()
Data were 'prettified'. Consider using `terms="predicted.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(RSE) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(RSE) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(RSE) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4793.6   4843.2  -2388.8   4777.6     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8434 -1.0083  0.5747  0.8744  2.3956 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr 
 trait  (Intercept) 1.004e-18 1.002e-09      
 subID  (Intercept) 9.301e-01 9.644e-01      
        entropy.Z   1.594e+00 1.262e+00 -0.55
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                     Estimate Std. Error z value Pr(>|z|)   
(Intercept)            0.7127     0.2436   2.925  0.00344 **
entropy.Z             -0.5494     0.3071  -1.789  0.07362 . 
scale(RSE)            -0.2484     0.2335  -1.064  0.28751   
entropy.Z:scale(RSE)   0.3395     0.3118   1.089  0.27626   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(RSE)
entropy.Z   -0.412              
scale(RSE)  -0.019  0.206       
ent.Z:(RSE)  0.218 -0.047 -0.388
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "RSE")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SCC) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(SCC) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4795.1   4844.7  -2389.6   4779.1     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8462 -1.0069  0.5800  0.8735  2.3915 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr 
 trait  (Intercept) 6.577e-20 2.564e-10      
 subID  (Intercept) 9.521e-01 9.757e-01      
        entropy.Z   1.680e+00 1.296e+00 -0.56
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                      Estimate Std. Error z value Pr(>|z|)   
(Intercept)           0.690663   0.243870   2.832  0.00462 **
entropy.Z            -0.527405   0.314479  -1.677  0.09353 . 
scale(SCC)           -0.097378   0.240594  -0.405  0.68567   
entropy.Z:scale(SCC)  0.005057   0.299650   0.017  0.98653   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(SCC)
entropy.Z   -0.436              
scale(SCC)  -0.071  0.122       
ent.Z:(SCC)  0.118  0.025 -0.481
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "SCC")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(DS) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(DS) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4793.8   4843.4  -2388.9   4777.8     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8406 -1.0079  0.5813  0.8733  2.4261 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.000         
 subID  (Intercept) 0.8798   0.938         
        entropy.Z   1.6204   1.273    -0.55
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                    Estimate Std. Error z value Pr(>|z|)   
(Intercept)           0.7324     0.2379   3.078  0.00208 **
entropy.Z            -0.5417     0.3101  -1.747  0.08070 . 
scale(DS)             0.2496     0.2290   1.090  0.27583   
entropy.Z:scale(DS)  -0.3141     0.3095  -1.015  0.31015   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z sc(DS)
entropy.Z   -0.431              
scale(DS)    0.144 -0.206       
entr.Z:(DS) -0.214  0.020 -0.448
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "DS")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(NFC) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(NFC) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4791.1   4840.7  -2387.6   4775.1     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8458 -1.0084  0.5964  0.8739  2.4604 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.0000        
 subID  (Intercept) 0.7927   0.8903        
        entropy.Z   1.2373   1.1124   -0.55
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                     Estimate Std. Error z value Pr(>|z|)   
(Intercept)            0.7115     0.2278   3.123  0.00179 **
entropy.Z             -0.5087     0.2797  -1.819  0.06897 . 
scale(NFC)             0.2597     0.2271   1.144  0.25278   
entropy.Z:scale(NFC)  -0.5659     0.2716  -2.083  0.03722 * 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(NFC)
entropy.Z   -0.442              
scale(NFC)   0.112 -0.175       
ent.Z:(NFC) -0.190  0.048 -0.483
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "NFC")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SING.Ind) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(SING.Ind) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4791.7   4841.3  -2387.9   4775.7     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8319 -1.0092  0.5749  0.8759  2.4004 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.0000        
 subID  (Intercept) 0.6746   0.8214        
        entropy.Z   1.4260   1.1941   -0.65
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                          Estimate Std. Error z value Pr(>|z|)   
(Intercept)                 0.6476     0.2177   2.974  0.00294 **
entropy.Z                  -0.6070     0.2932  -2.071  0.03840 * 
scale(SING.Ind)            -0.5182     0.2563  -2.022  0.04318 * 
entropy.Z:scale(SING.Ind)   0.1566     0.2875   0.545  0.58605   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(SING
entropy.Z   -0.467              
scl(SING.I) -0.005  0.244       
e.Z:(SING.I  0.228 -0.066 -0.327
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "SING.Ind")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SING.Inter) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(SING.Inter) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4791.4   4841.0  -2387.7   4775.4     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8785 -1.0138  0.5936  0.8820  2.4011 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.0000        
 subID  (Intercept) 0.6094   0.7806        
        entropy.Z   1.7311   1.3157   -0.51
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                            Estimate Std. Error z value Pr(>|z|)    
(Intercept)                   0.7522     0.2142   3.512 0.000444 ***
entropy.Z                    -0.4557     0.3140  -1.451 0.146682    
scale(SING.Inter)             0.5286     0.2593   2.038 0.041513 *  
entropy.Z:scale(SING.Inter)  -0.1277     0.2946  -0.433 0.664789    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(SING
entropy.Z   -0.382              
scl(SING.I)  0.237 -0.005       
e.Z:(SING.I -0.056  0.040 -0.301
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "SING.Inter")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(Proto) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(Proto) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4795.2   4844.8  -2389.6   4779.2     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8447 -1.0052  0.5806  0.8732  2.3944 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.0000        
 subID  (Intercept) 0.9035   0.9505        
        entropy.Z   1.6478   1.2837   -0.54
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                       Estimate Std. Error z value Pr(>|z|)   
(Intercept)             0.69010    0.23886   2.889  0.00386 **
entropy.Z              -0.50042    0.31002  -1.614  0.10650   
scale(Proto)            0.08185    0.23908   0.342  0.73209   
entropy.Z:scale(Proto) -0.09448    0.29866  -0.316  0.75173   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z scl(P)
entropy.Z   -0.432              
scale(Prot)  0.057  0.041       
entrp.Z:(P)  0.011 -0.017 -0.281
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "Proto")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SI) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(SI) + (entropy.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4790.2   4839.8  -2387.1   4774.2     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8302 -1.0044  0.5814  0.8769  2.4447 

Random effects:
 Groups Name        Variance Std.Dev. Corr 
 trait  (Intercept) 0.0000   0.0000        
 subID  (Intercept) 0.6831   0.8265        
        entropy.Z   1.8112   1.3458   -0.64
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                    Estimate Std. Error z value Pr(>|z|)    
(Intercept)          0.74323    0.21772   3.414 0.000641 ***
entropy.Z           -0.47302    0.31535  -1.500 0.133618    
scale(SI)            0.45524    0.21902   2.079 0.037661 *  
entropy.Z:scale(SI) -0.01954    0.30810  -0.063 0.949443    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z sc(SI)
entropy.Z   -0.487              
scale(SI)    0.165 -0.075       
entr.Z:(SI) -0.024  0.071 -0.503
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "SI")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(NTB) + ( entropy.Z | subID) + ( entropy.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ entropy.Z * scale(NTB) + (entropy.Z |      subID) + (entropy.Z | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4798.1   4860.1  -2389.0   4778.1     3630 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.8355 -1.0062  0.5801  0.8745  2.3634 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr 
 trait  (Intercept) 0.000e+00 0.000e+00      
        entropy.Z   5.571e-16 2.360e-08  NaN 
 subID  (Intercept) 8.986e-01 9.479e-01      
        entropy.Z   1.595e+00 1.263e+00 -0.57
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                     Estimate Std. Error z value Pr(>|z|)   
(Intercept)           0.68431    0.23774   2.878   0.0040 **
entropy.Z            -0.52197    0.30393  -1.717   0.0859 . 
scale(NTB)            0.24331    0.23185   1.049   0.2940   
entropy.Z:scale(NTB) -0.06185    0.29468  -0.210   0.8337   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) entr.Z s(NTB)
entropy.Z   -0.452              
scale(NTB)   0.040 -0.082       
ent.Z:(NTB) -0.066  0.031 -0.534
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("entropy.Z", "NTB")) %>% plot()
Data were 'prettified'. Consider using `terms="entropy.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(RSE) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ slope.Z * scale(RSE) + (slope.Z | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4717.6   4767.2  -2350.8   4701.6     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.0778 -0.9845  0.4695  0.8520  2.6235 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.000    0.000        
 subID  (Intercept) 8.780    2.963        
        slope.Z     8.935    2.989    0.33
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                   Estimate Std. Error z value Pr(>|z|)  
(Intercept)          0.1636     0.6400   0.256   0.7982  
slope.Z              1.2559     0.6528   1.924   0.0544 .
scale(RSE)          -1.1510     0.6472  -1.778   0.0753 .
slope.Z:scale(RSE)  -0.6067     0.6374  -0.952   0.3412  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z s(RSE)
slope.Z      0.284              
scale(RSE)  -0.004 -0.062       
slp.Z:(RSE) -0.062 -0.004  0.261
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("slope.Z", "RSE")) %>% plot()
Data were 'prettified'. Consider using `terms="slope.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SCC) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ slope.Z * scale(SCC) + (slope.Z | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4720.3   4769.9  -2352.2   4704.3     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.4081 -0.9850  0.4734  0.8486  2.6613 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr
 trait  (Intercept) 3.451e-16 1.858e-08     
 subID  (Intercept) 1.003e+01 3.167e+00     
        slope.Z     9.640e+00 3.105e+00 0.39
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                   Estimate Std. Error z value Pr(>|z|)  
(Intercept)          0.1138     0.6771   0.168   0.8665  
slope.Z              1.2064     0.6740   1.790   0.0735 .
scale(SCC)          -0.3103     0.6658  -0.466   0.6412  
slope.Z:scale(SCC)   0.2471     0.6620   0.373   0.7089  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z s(SCC)
slope.Z      0.342              
scale(SCC)   0.011 -0.042       
slp.Z:(SCC) -0.042  0.013  0.347
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("slope.Z", "SCC")) %>% plot()
Data were 'prettified'. Consider using `terms="slope.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(DS) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ slope.Z * scale(DS) + (slope.Z | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4719.6   4769.2  -2351.8   4703.6     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.2079 -0.9852  0.4703  0.8481  2.6378 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.000    0.000        
 subID  (Intercept) 9.636    3.104        
        slope.Z     9.323    3.053    0.34
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                  Estimate Std. Error z value Pr(>|z|)  
(Intercept)         0.1673     0.6667   0.251   0.8019  
slope.Z             1.2297     0.6656   1.848   0.0647 .
scale(DS)           0.6742     0.6705   1.006   0.3146  
slope.Z:scale(DS)   0.5111     0.6539   0.782   0.4344  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z sc(DS)
slope.Z     0.299               
scale(DS)   0.017  0.063        
slp.Z:s(DS) 0.059  0.012  0.288 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("slope.Z", "DS")) %>% plot()
Data were 'prettified'. Consider using `terms="slope.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(NFC) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ slope.Z * scale(NFC) + (slope.Z | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4719.1   4768.7  -2351.5   4703.1     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.3504 -0.9885  0.4707  0.8482  2.6496 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.000    0.000        
 subID  (Intercept) 9.830    3.135        
        slope.Z     9.302    3.050    0.41
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                   Estimate Std. Error z value Pr(>|z|)  
(Intercept)          0.1664     0.6708   0.248   0.8041  
slope.Z              1.1872     0.6637   1.789   0.0737 .
scale(NFC)          -0.3811     0.6609  -0.577   0.5641  
slope.Z:scale(NFC)   0.6284     0.6594   0.953   0.3406  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z s(NFC)
slope.Z      0.357              
scale(NFC)  -0.017  0.010       
slp.Z:(NFC)  0.011  0.027  0.344
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("slope.Z", "NFC")) %>% plot()
Data were 'prettified'. Consider using `terms="slope.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SING.Ind) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ slope.Z * scale(SING.Ind) + (slope.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4717.4   4767.0  -2350.7   4701.4     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.1006 -0.9855  0.4721  0.8500  2.6384 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.000    0.000        
 subID  (Intercept) 8.250    2.872        
        slope.Z     8.988    2.998    0.36
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                        Estimate Std. Error z value Pr(>|z|)  
(Intercept)              0.03639    0.63023   0.058   0.9540  
slope.Z                  1.33378    0.65833   2.026   0.0428 *
scale(SING.Ind)         -1.43648    0.74774  -1.921   0.0547 .
slope.Z:scale(SING.Ind) -0.27897    0.65618  -0.425   0.6707  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z s(SING
slope.Z      0.289              
scl(SING.I)  0.103 -0.132       
s.Z:(SING.I -0.124  0.028  0.091
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("slope.Z", "SING.Ind")) %>% plot()
Data were 'prettified'. Consider using `terms="slope.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SING.Inter) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ slope.Z * scale(SING.Inter) + (slope.Z |      subID) + (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4719.2   4768.8  -2351.6   4703.2     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.3813 -0.9853  0.4706  0.8482  2.6105 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.000    0.000        
 subID  (Intercept) 9.447    3.074        
        slope.Z     9.766    3.125    0.37
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                          Estimate Std. Error z value Pr(>|z|)  
(Intercept)                0.23586    0.66492   0.355   0.7228  
slope.Z                    1.14832    0.67768   1.694   0.0902 .
scale(SING.Inter)          0.98467    0.75955   1.296   0.1948  
slope.Z:scale(SING.Inter)  0.08021    0.67349   0.119   0.9052  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z s(SING
slope.Z      0.317              
scl(SING.I)  0.112 -0.034       
s.Z:(SING.I -0.049  0.003  0.145
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("slope.Z", "SING.Inter")) %>% plot()
Data were 'prettified'. Consider using `terms="slope.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(Proto) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ slope.Z * scale(Proto) + (slope.Z | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4720.7   4770.3  -2352.4   4704.7     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.3935 -0.9845  0.4706  0.8482  2.6074 

Random effects:
 Groups Name        Variance  Std.Dev.  Corr
 trait  (Intercept) 1.223e-15 3.497e-08     
 subID  (Intercept) 1.005e+01 3.170e+00     
        slope.Z     9.616e+00 3.101e+00 0.39
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                     Estimate Std. Error z value Pr(>|z|)  
(Intercept)           0.11611    0.67889   0.171   0.8642  
slope.Z               1.19302    0.67388   1.770   0.0767 .
scale(Proto)         -0.05402    0.72101  -0.075   0.9403  
slope.Z:scale(Proto)  0.23967    0.66898   0.358   0.7201  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z scl(P)
slope.Z      0.337              
scale(Prot)  0.029 -0.051       
slp.Z:sc(P) -0.053  0.000  0.270
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("slope.Z", "Proto")) %>% plot()
Data were 'prettified'. Consider using `terms="slope.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SI) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ slope.Z * scale(SI) + (slope.Z | subID) +      (1 | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4718.6   4768.2  -2351.3   4702.6     3632 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.1024 -0.9805  0.4712  0.8492  2.6025 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.000    0.000        
 subID  (Intercept) 9.008    3.001        
        slope.Z     9.362    3.060    0.33
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                  Estimate Std. Error z value Pr(>|z|)  
(Intercept)         0.1666     0.6465   0.258   0.7967  
slope.Z             1.1603     0.6651   1.745   0.0811 .
scale(SI)           1.0067     0.6722   1.498   0.1342  
slope.Z:scale(SI)   0.5536     0.6642   0.834   0.4045  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z sc(SI)
slope.Z      0.281              
scale(SI)    0.044 -0.025       
slp.Z:s(SI) -0.028 -0.006  0.273
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("slope.Z", "SI")) %>% plot()
Data were 'prettified'. Consider using `terms="slope.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(NTB) + ( slope.Z | subID) + ( slope.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ slope.Z * scale(NTB) + (slope.Z | subID) +      (slope.Z | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4723.3   4785.3  -2351.6   4703.3     3630 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.4383 -0.9847  0.4726  0.8491  2.5658 

Random effects:
 Groups Name        Variance Std.Dev. Corr
 trait  (Intercept) 0.000000 0.00000      
        slope.Z     0.001685 0.04104   NaN
 subID  (Intercept) 9.575622 3.09445      
        slope.Z     9.551674 3.09058  0.37
Number of obs: 3640, groups:  trait, 148; subID, 25

Fixed effects:
                   Estimate Std. Error z value Pr(>|z|)  
(Intercept)          0.1747     0.6654   0.263   0.7929  
slope.Z              1.1860     0.6712   1.767   0.0772 .
scale(NTB)           0.8431     0.6661   1.266   0.2056  
slope.Z:scale(NTB)   0.3383     0.6636   0.510   0.6102  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) slop.Z s(NTB)
slope.Z     0.327               
scale(NTB)  0.045  0.003        
slp.Z:(NTB) 0.004  0.012  0.376 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("slope.Z", "NTB")) %>% plot()
Data were 'prettified'. Consider using `terms="slope.Z [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(RSE) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(RSE) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2111.4   2165.2  -1045.7   2091.4     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8959 -1.0144  0.4983  0.9101  1.4069 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003365 0.05801      
        scale(desirability) 0.013385 0.11570  1.00
 subID  (Intercept)         0.415188 0.64435      
        scale(desirability) 0.001739 0.04170  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                               Estimate Std. Error z value Pr(>|z|)    
(Intercept)                     0.38520    0.20272   1.900  0.05741 .  
scale(desirability)             0.20932    0.06083   3.441  0.00058 ***
scale(RSE)                      0.27336    0.20314   1.346  0.17840    
scale(desirability):scale(RSE)  0.02205    0.05729   0.385  0.70036    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(RSE)
scl(dsrblt) 0.218               
scale(RSE)  0.019  0.019        
scl():(RSE) 0.018  0.128  0.233 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "RSE")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SCC) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(SCC) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2110.0   2163.8  -1045.0   2090.0     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8913 -1.0078  0.4896  0.9083  1.3630 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003354 0.05791      
        scale(desirability) 0.013479 0.11610  1.00
 subID  (Intercept)         0.377675 0.61455      
        scale(desirability) 0.003107 0.05574  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                                Estimate Std. Error z value Pr(>|z|)    
(Intercept)                     0.385673   0.194130   1.987 0.046958 *  
scale(desirability)             0.209174   0.061907   3.379 0.000728 ***
scale(SCC)                      0.337654   0.195663   1.726 0.084403 .  
scale(desirability):scale(SCC) -0.006718   0.060638  -0.111 0.911783    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(SCC)
scl(dsrblt) 0.280               
scale(SCC)  0.026  0.024        
scl():(SCC) 0.022  0.143  0.293 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "SCC")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(DS) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(DS) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2110.9   2164.7  -1045.4   2090.9     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.7639 -1.0088  0.4900  0.9098  1.4151 

Random effects:
 Groups Name                Variance  Std.Dev. Corr
 trait  (Intercept)         0.0034089 0.05839      
        scale(desirability) 0.0134907 0.11615  1.00
 subID  (Intercept)         0.3893895 0.62401      
        scale(desirability) 0.0009413 0.03068  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                              Estimate Std. Error z value Pr(>|z|)    
(Intercept)                    0.38260    0.19680   1.944 0.051891 .  
scale(desirability)            0.20787    0.06023   3.451 0.000558 ***
scale(DS)                     -0.29740    0.19610  -1.517 0.129382    
scale(desirability):scale(DS) -0.03279    0.05445  -0.602 0.547033    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) sc(DS)
scl(dsrblt)  0.167              
scale(DS)   -0.015 -0.017       
scl(d):(DS) -0.015 -0.103  0.179
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "DS")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(NFC) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(NFC) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2112.6   2166.4  -1046.3   2092.6     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8577 -1.0118  0.4889  0.9094  1.3512 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003355 0.05792      
        scale(desirability) 0.013450 0.11598  1.00
 subID  (Intercept)         0.479426 0.69241      
        scale(desirability) 0.002268 0.04762  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                               Estimate Std. Error z value Pr(>|z|)    
(Intercept)                     0.38517    0.21664   1.778 0.075415 .  
scale(desirability)             0.20871    0.06126   3.407 0.000657 ***
scale(NFC)                     -0.09903    0.21536  -0.460 0.645636    
scale(desirability):scale(NFC)  0.01942    0.05410   0.359 0.719619    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(NFC)
scl(dsrblt)  0.244              
scale(NFC)  -0.008 -0.007       
scl():(NFC) -0.008 -0.060  0.264
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "NFC")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SING.Ind) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(SING.Ind) +  
    (scale(desirability) | subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2112.7   2166.5  -1046.4   2092.7     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8602 -1.0147  0.4910  0.9059  1.3673 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003337 0.05777      
        scale(desirability) 0.013431 0.11589  1.00
 subID  (Intercept)         0.486231 0.69730      
        scale(desirability) 0.002218 0.04710  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                                    Estimate Std. Error z value Pr(>|z|)    
(Intercept)                          0.38508    0.21806   1.766 0.077406 .  
scale(desirability)                  0.20921    0.06118   3.419 0.000628 ***
scale(SING.Ind)                      0.05346    0.21663   0.247 0.805083    
scale(desirability):scale(SING.Ind) -0.02489    0.05367  -0.464 0.642820    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(SING
scl(dsrblt) 0.242               
scl(SING.I) 0.004  0.003        
s():(SING.I 0.003  0.022  0.263 
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "SING.Ind")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SING.Inter) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(SING.Inter) +  
    (scale(desirability) | subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2110.4   2164.2  -1045.2   2090.4     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.8256 -1.0104  0.4934  0.9125  1.4150 

Random effects:
 Groups Name                Variance  Std.Dev. Corr
 trait  (Intercept)         0.0000000 0.00000      
        scale(desirability) 0.0093185 0.09653   NaN
 subID  (Intercept)         0.3727206 0.61051      
        scale(desirability) 0.0009385 0.03063  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                                      Estimate Std. Error z value Pr(>|z|)    
(Intercept)                            0.38345    0.19280   1.989 0.046716 *  
scale(desirability)                    0.20642    0.05956   3.466 0.000529 ***
scale(SING.Inter)                     -0.33236    0.19353  -1.717 0.085910 .  
scale(desirability):scale(SING.Inter) -0.03516    0.05701  -0.617 0.537466    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(SING
scl(dsrblt)  0.162              
scl(SING.I) -0.022 -0.021       
s():(SING.I -0.020 -0.138  0.177
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "SING.Inter")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(Proto) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(Proto) +  
    (scale(desirability) | subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2109.2   2163.0  -1044.6   2089.2     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.3834 -1.0152  0.5623  0.9062  1.4029 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003279 0.05726      
        scale(desirability) 0.013172 0.11477  1.00
 subID  (Intercept)         0.374479 0.61195      
        scale(desirability) 0.001273 0.03567  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                                 Estimate Std. Error z value Pr(>|z|)    
(Intercept)                       0.39167    0.19357   2.023 0.043030 *  
scale(desirability)               0.21533    0.06089   3.536 0.000406 ***
scale(Proto)                     -0.38362    0.20188  -1.900 0.057402 .  
scale(desirability):scale(Proto) -0.07237    0.07165  -1.010 0.312464    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) scl(P)
scl(dsrblt)  0.194              
scale(Prot) -0.043 -0.047       
scl(ds):(P) -0.038 -0.225  0.229
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "Proto")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SI) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(SI) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2110.7   2164.5  -1045.4   2090.7     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.0641 -1.0169  0.5313  0.9074  1.5565 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.003313 0.05756      
        scale(desirability) 0.013506 0.11621  1.00
 subID  (Intercept)         0.492358 0.70168      
        scale(desirability) 0.002593 0.05092  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                              Estimate Std. Error z value Pr(>|z|)    
(Intercept)                    0.38629    0.21938   1.761 0.078272 .  
scale(desirability)            0.21359    0.06156   3.470 0.000521 ***
scale(SI)                     -0.05538    0.21816  -0.254 0.799632    
scale(desirability):scale(SI) -0.08211    0.05464  -1.503 0.132865    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) sc(SI)
scl(dsrblt)  0.260              
scale(SI)   -0.008 -0.009       
scl(d):(SI) -0.009 -0.079  0.280
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "SI")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(NTB) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
boundary (singular) fit: see help('isSingular')
summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ scale(desirability) * scale(NTB) + (scale(desirability) |  
    subID) + (scale(desirability) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2112.8   2166.6  -1046.4   2092.8     1594 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-3.9039 -1.0131  0.5005  0.9074  1.4211 

Random effects:
 Groups Name                Variance Std.Dev. Corr
 trait  (Intercept)         0.00337  0.05805      
        scale(desirability) 0.01338  0.11569  1.00
 subID  (Intercept)         0.47789  0.69130      
        scale(desirability) 0.00211  0.04593  1.00
Number of obs: 1604, groups:  trait, 148; subID, 11

Fixed effects:
                               Estimate Std. Error z value Pr(>|z|)    
(Intercept)                     0.38539    0.21633   1.782  0.07483 .  
scale(desirability)             0.20947    0.06112   3.427  0.00061 ***
scale(NTB)                     -0.11353    0.21542  -0.527  0.59819    
scale(desirability):scale(NTB) -0.01384    0.05480  -0.253  0.80060    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) scl(d) s(NTB)
scl(dsrblt)  0.237              
scale(NTB)  -0.010 -0.010       
scl():(NTB) -0.009 -0.075  0.255
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
ggpredict(m, c("desirability", "NTB")) %>% plot()
Data were 'prettified'. Consider using `terms="desirability [all]"` to get smooth plots.

summary(m)
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: binomial  ( logit )
Formula: as.factor(ingChoiceN) ~ predicted.Z * novel * scale(NFC) + (predicted.Z +  
    novel | subID) + (SE.Z * as.factor(novel) | trait)
   Data: fullTest
Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  4476.7   4623.6  -2214.3   4428.7     3348 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-2.2596 -1.0499  0.6322  0.8416  2.2666 

Random effects:
 Groups Name                          Variance Std.Dev. Corr             
 trait  (Intercept)                   0.000000 0.00000                   
        SE.Z                          0.001061 0.03257    NaN            
        as.factor(novel)Held Out      0.002307 0.04803    NaN -1.00      
        SE.Z:as.factor(novel)Held Out 0.038538 0.19631    NaN -1.00  1.00
 subID  (Intercept)                   0.257661 0.50760                   
        predicted.Z                   0.008917 0.09443  -1.00            
        novelHeld Out                 0.002718 0.05214  -1.00  1.00      
Number of obs: 3372, groups:  trait, 148; subID, 25

Fixed effects:
                                      Estimate Std. Error z value Pr(>|z|)   
(Intercept)                           0.298628   0.116464   2.564  0.01034 * 
predicted.Z                           0.318133   0.123109   2.584  0.00976 **
novelHeld Out                         0.031633   0.078956   0.401  0.68869   
scale(NFC)                           -0.101568   0.117287  -0.866  0.38650   
predicted.Z:novelHeld Out            -0.007964   0.083210  -0.096  0.92376   
predicted.Z:scale(NFC)                0.225593   0.143761   1.569  0.11660   
novelHeld Out:scale(NFC)              0.037730   0.075930   0.497  0.61925   
predicted.Z:novelHeld Out:scale(NFC)  0.108722   0.096644   1.125  0.26060   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
            (Intr) prdc.Z nvlHlO s(NFC) pr.Z:HO p.Z:(N nHO:(N
predicted.Z -0.260                                           
novelHeldOt -0.409  0.048                                    
scale(NFC)  -0.018  0.017 -0.036                             
prdctd.Z:HO  0.057 -0.424 -0.001  0.025                      
prd.Z:(NFC)  0.097  0.270 -0.001 -0.400 -0.138               
nvlHO:(NFC) -0.040  0.027  0.084 -0.393 -0.037   0.129       
p.Z:HO:(NFC  0.003 -0.139  0.046  0.124  0.312  -0.405 -0.259
optimizer (bobyqa) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SCC) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SCC")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(DS) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","DS")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(NFC) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel", "NFC")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SING.Ind) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SING.Ind")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SING.Inter) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SING.Inter")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(Proto) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "Proto")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SI) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SI")) %>% plot()
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(NTB) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "novel", "NTB")) %>% plot()
---
title: "R Notebook"
output: html_notebook
---

```{r}
library(lmerTest)
library(ggeffects)
library(dplyr)
library(report)
library(r2glmm)
library(performance)
library(broom.mixed)
```

```{r}
fullTest <- read.csv("../Cleaning/output/fullTest.csv")
fullTrain <- read.csv("../Cleaning/output/fullTrain.csv")

traitsFreqs <- read.csv("../Cleaning/output/traitFreqOverUnder.csv")
traitsFreqs <- rename(traitsFreqs, props = optionChoiceN)
fullTest <- merge(fullTest, traitsFreqs[c("trait","props")], by = "trait")
fullTest$propCorr <- ifelse(fullTest$Estimator=="Underestimator", 1-fullTest$props, fullTest$props)

uSubs <- unique(fullTest$subID)

indDiffs <- fullTest[!duplicated(fullTest$subID),]
```

```{r}
fullTest$ingChoiceN <- as.factor(fullTest$ingChoiceN)
fullTest$novel <- as.factor(fullTest$novel)
fullTest$selfResp.Z <- scale(fullTest$selfResp)
fullTest$SE.Z <- scale(fullTest$SE)
fullTest$iSE.Z <- scale(fullTest$iSE)
fullTest$oSE.Z <- scale(fullTest$oSE)
fullTest$predicted.Z <- scale(fullTest$predicted)
fullTest$slope.Z <- scale(fullTest$slope)
fullTest$entropy.Z <- scale(fullTest$entropy)
fullTest$WSR.Z <- scale(fullTest$WSR)
fullTest$neighAveOutSE.Z <- scale(fullTest$neighAveOutSE)
fullTest$neighAveAllSE.Z <- scale(fullTest$neighAveAllSE)
fullTest$neighAveInSE.Z <- scale(fullTest$neighAveInSE)
```


```{r}
fullTest$novel <- as.factor(fullTest$novel)
levels(fullTest$novel) <- list("Trained"  = "0", "Held Out" = "1")
```


```{r}
# prop.test(traitsFreqs$optionChoiceN, traitsFreqs$N, p=rep(.5,length(traitsFreqs$N)))
# 
# m <- glmer( ingChoiceN ~ trait + ( 1 | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
#                                     optCtrl = list(maxfun = 100000)),
#     nAGQ = 1)
# 
# fullTest$trait <- as.factor(fullTest$trait)
# contrasts(fullTest$trait) <- contr.sum(148)
# m <- glm(optionChoiceN ~ trait, family = binomial,
#           data = fullTest
#          )
# summary(m)
```


```{r}
# traitsFreqs$trait <- as.factor(traitsFreqs$trait)
# contrasts(traitsFreqs$trait) <- contr.sum(148)
# m <- glm(optionChoiceN ~ 1, family = binomial,
#           data = traitsFreqs
#          )
# summary(m)
# check_overdispersion(m)
# check_model(m)
# 
# m <- glm(optionChoiceN ~ trait, family = quasibinomial,
#           data = traitsFreqs
#          )
# check_overdispersion(m)
# check_model(m)
# 
# t.test()
# 
# m <- glm(optionChoiceN ~ trait, family = poisson,
#           data = traitsFreqs
#          )
# check_overdispersion(m)
# check_model(m)
```

```{r}
propMatrix <- matrix(nrow=148,ncol=7)
for(i in 1:148){
    traitDf <- subset(fullTest, Idx==i)
    test <- t.test(as.numeric(traitDf$ingChoiceN)-1, mu=.50)
    propMatrix[i, ] <- c(i, test$statistic, test$p.value, test$conf.int, test$estimate, test$parameter)
}
colnames(propMatrix) <- c("Idx", "stat", "p", "LCI", "UCI", "est", "param")
propMatrix <- as.data.frame(propMatrix)
propMatrix$trait <- traitsFreqs$trait
propMatrix <- propMatrix[order(propMatrix$p),]
```

```{r}
library(corrr)
x <- indDiffs %>% 
    select(groupHomoph, DS:SING.Inter) %>%
  correlate() %>% 
  focus(groupHomoph) %>%
    arrange(groupHomoph)

x %>% 
  mutate(rowname = factor(rowname, levels = rowname[order(groupHomoph)])) %>%  # Order by correlation strength
  ggplot(aes(x = rowname, y = groupHomoph)) +
    geom_bar(stat = "identity") +
    ylab("Correlation Coefficient") +
    xlab("Individual Differences") + theme_grey(base_size = 9)  + theme(axis.text.x = element_text(angle = 90,hjust = 1)) +
  theme(axis.text.x = element_text( 
                           size = 9, angle = 45, vjust = 1)) + theme(axis.title.x = element_text(vjust=1.9)) + theme(axis.text=element_text(size=9),
        axis.title=element_text(size=9,face="bold")) + theme(legend.text = element_text(size=9)) + theme(panel.border = element_rect(colour = "black", fill = NA, size =1)) + theme(legend.title = element_blank()) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
```
# Familiarity predicts Reaction Time

```{r}
m <- lmer( log(RT) ~ fam + ( fam | subID) + ( 1 | trait), data = fullTest)
summary(m)
ggpredict(m, c("fam")) %>% plot()
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```

```{r}
m <- lmer( log(RT) ~ fam + propCorr + desirability + ( fam | subID) + ( 1 | trait), data = fullTest)
summary(m)
ggpredict(m, c("fam")) %>% plot()
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```


# Replication of prior self-anchoring findings: Self-evaluations predicting ingroup evaluations

```{r}
m <- glmer( ingChoiceN ~ selfResp.Z + propCorr + desirability + ( selfResp.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
library(broom.mixed)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("selfResp.Z")) %>% plot(show.title=F) + xlab("Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/SelfProjection.tiff",dpi=600)
```

# Does similarity-weighted self-evaluation average predict ingroup choices?

```{r}
m <- glmer( ingChoiceN ~ WSR.Z + propCorr + desirability + ( WSR.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
```
# Does self-evaluation weighted similarity predict ingroup choices?

```{r}
m <- glmer( ingChoiceN ~ SE.Z + propCorr + desirability + ( SE.Z | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
```

# Do cross-validated similarity*self-evaluation predictions predict ingroup choices? 

```{r}
m <- glmer( ingChoiceN ~ predicted.Z + propCorr + desirability + ( predicted.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("predicted.Z")) %>% plot(show.title=F) + xlab("Cross-Validated Self-Descriptiveness Predictions") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVprediction.tiff",dpi=600)
```

# Do cross-validated similarity*self-evaluation predictions predict ingroup choices, regardless of whether it was seen prior or not?

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * novel + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("predicted.Z", "novel")) %>% plot(show.title=F)+ xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()  + scale_color_discrete(labels = c("Trained","Held-Out"))
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVpredictionGeneralize.tiff",dpi=600)
```

# Does generalization depend on outdegree?

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * novel * outDegree  + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("predicted.Z", "outDegree" ,"novel")) %>% plot(show.title=F)+ xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVpredictionGeneralize.tiff",dpi=600)
```
# Does generalization depend on indegree?

Nope

```{r}
m <- glmer( ingChoiceN ~ predicted.Z * novel * inDegree + ( predicted.Z + novel | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("predicted.Z", "outDegree" ,"novel")) %>% plot(show.title=F)+ xlab("Cross-Validated Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
#ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/CVpredictionGeneralize.tiff",dpi=600)
```

# Neighboring Dependencies Predicting Choices

```{r}
m <- glmer( ingChoiceN ~ neighAveOutSE.Z  + propCorr + desirability + ( neighAveOutSE.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("neighAveOutSE.Z")) %>% plot(show.title=F)+ xlab("Outwards Neighboring Self-Evaluations") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/outdegreeNeighbors.tiff",dpi=600)
```

# Generalization of Outdegree Neighboring Self-Evaluations

```{r}
m <- glmer( ingChoiceN ~ neighAveOutSE.Z * novel + ( neighAveOutSE.Z + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("neighAveOutSE.Z","novel")) %>% plot(show.title=F)+ xlab("Outwards Neighboring Self-Evaluations") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/outdegreeNeighborsGeneralization.tiff",dpi=600)
```

# Does entropy (i.e., uncertainty) predict likelihood of ingroup choices?

```{r}
m <- glmer( ingChoiceN ~ entropy.Z  + propCorr + desirability + ( entropy.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
ggpredict(m, c("entropy.Z")) %>% plot(show.title=F) + xlab("Uncertainty") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/Uncertainty.tiff",dpi=600)
```

# Does a linear trend of similarity-based probabilities predict ingroup choices?

```{r}
m <- glmer( ingChoiceN ~ slope.Z  + propCorr + desirability + ( slope.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
ggpredict(m, c("slope.Z")) %>% plot(show.title=F) + xlab("Linear Trend of Greater Self-Descriptiveness") + ylab("Likelihood of Ingroup Choice") + jtools::theme_apa()
ggsave("~/Documents/UC Riverside/Studies/Self-Anchoring/Figures/Slope.tiff",dpi=600)
```

# Does a linear trend of similarity-based probabilities predict ingroup choices, controlling for self-descriptiveness?

```{r}
m <- glmer( ingChoiceN ~ scale(slope) + selfResp.Z  + ( scale(slope) + selfResp.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```

# Does a non-parametric trend of similarity-based probabilities predict ingroup choices, controlling for self-descriptiveness?

```{r}
m <- glmer( ingChoiceN ~ scale(nlslope) + selfResp.Z +  ( scale(nlslope) + selfResp.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```

# Does a linear trend of similarity-based probabilities predict ingroup choices?

```{r}
m <- glmer( ingChoiceN ~ slope.Z  + predicted.Z + ( slope.Z + predicted.Z | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```

# Backwards solution: Can you predict self-evaluations from similarity to ingroup and outgroup choices?

```{r}
m <- lmer( scale(selfResp) ~ scale(inGsim) + scale(outGsim) + (  scale(inGsim) + scale(outGsim) | subID) + (1 | trait), data = fullTrain)
summary(m)
tidy(m,conf.int=TRUE,effects="fixed")
ggpredict(m, c("inGsim")) %>% plot(show.title=F) + xlab("Similarity to Ingroup Choices") + ylab("Self-Evaluation") + jtools::theme_apa()
```

```{r}
m <- glmer( ingChoiceN ~ scale(slope) * novel  + ( scale(slope) + novel | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
```

```{r}
m <- glmer( ingChoiceN ~ eSE + ( eSE | subID) , data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
```


```{r}
m <- glmer( ingChoiceN ~ sSE + ( sSE | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
```

```{r}
m <- glmer( ingChoiceN ~ SE.Z * novel + ( SE.Z + novel | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
tidy(m,conf.int=TRUE,exponentiate=TRUE,effects="fixed")
r2beta(m)
```


```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z + scale(desirability) + ( SE.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

m <- glmer( as.factor(ingChoiceN) ~ scale(oSE) + ( scale(oSE) | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

m <- glmer( as.factor(ingChoiceN) ~ scale(iSE) + ( scale(iSE) | subID) + (1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel) + ( SE.Z + as.factor(novel) | subID) + (  1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)

m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel) + scale(desirability) + ( SE.Z + as.factor(novel) + scale(desirability) | subID) + (  1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "novel")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(RSE) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "RSE")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SCC) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "SCC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(DS) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "DS")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(NFC) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "NFC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SING.Ind) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "SING.Ind")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SING.Inter) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "SING.Inter")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(Proto) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "Proto")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(SI) + ( predicted.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "SI")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*scale(NTB) + ( predicted.Z | subID) + ( predicted.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("predicted.Z", "NTB")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(RSE) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "RSE")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SCC) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "SCC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(DS) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "DS")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(NFC) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "NFC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SING.Ind) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "SING.Ind")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SING.Inter) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "SING.Inter")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(Proto) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "Proto")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(SI) + ( entropy.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "SI")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ entropy.Z*scale(NTB) + ( entropy.Z | subID) + ( entropy.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("entropy.Z", "NTB")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(RSE) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "RSE")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SCC) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "SCC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(DS) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "DS")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(NFC) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "NFC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SING.Ind) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "SING.Ind")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SING.Inter) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "SING.Inter")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(Proto) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "Proto")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(SI) + ( slope.Z | subID) + ( 1 | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "SI")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ slope.Z*scale(NTB) + ( slope.Z | subID) + ( slope.Z | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("slope.Z", "NTB")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(RSE) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "RSE")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SCC) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "SCC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(DS) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "DS")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(NFC) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "NFC")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SING.Ind) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "SING.Ind")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SING.Inter) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "SING.Inter")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(Proto) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "Proto")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(SI) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "SI")) %>% plot()

m <- glmer( as.factor(ingChoiceN) ~ scale(desirability)*scale(NTB) + ( scale(desirability) | subID) + ( scale(desirability) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("desirability", "NTB")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ predicted.Z*novel*scale(NFC) + ( predicted.Z+novel | subID) + ( SE.Z*as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "novel","RSE")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SCC) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SCC")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(DS) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","DS")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(NFC) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel", "NFC")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SING.Ind) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SING.Ind")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SING.Inter) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SING.Inter")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(Proto) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "Proto")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(SI) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE","novel","SI")) %>% plot()
```

```{r}
m <- glmer( as.factor(ingChoiceN) ~ SE.Z*as.factor(novel)*scale(NTB) + ( SE.Z+as.factor(novel) | subID) + ( SE.Z+as.factor(novel) | trait), data = fullTest, family = binomial, control = glmerControl(optimizer = "bobyqa",
                                    optCtrl = list(maxfun = 100000)),
    nAGQ = 1)
summary(m)
ggpredict(m, c("SE", "novel", "NTB")) %>% plot()
```


